if ( (v->vcpu_id = vcpu_id) != 0 )
{
- v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail;
+ v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail;
v->arch.perdomain_ptes =
d->arch.mm_perdomain_pt + (vcpu_id << PDPT_VCPU_SHIFT);
- v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
- l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
}
return v;
void arch_do_createdomain(struct vcpu *v)
{
struct domain *d = v->domain;
+ int vcpuid;
if ( is_idle_task(d) )
return;
set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT,
INVALID_M2P_ENTRY);
v->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
- v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
- l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+
+ /*
+ * Map Xen segments into every VCPU's GDT, irrespective of whether every
+ * VCPU will actually be used. This avoids an NMI race during context
+ * switch: if we take an interrupt after switching CR3 but before switching
+ * GDT, and the old VCPU# is invalid in the new domain, we would otherwise
+ * try to load CS from an invalid table.
+ */
+ for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
+ {
+ d->arch.mm_perdomain_pt[
+ (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] =
+ l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+ }
v->arch.guest_vtable = __linear_l2_table;
v->arch.shadow_vtable = __shadow_linear_l2_table;
static void __init start_of_day(void)
{
int i;
+ unsigned long vgdt;
early_cpu_init();
arch_do_createdomain(current);
- /* Map default GDT into their final position in the idle page table. */
- map_pages_to_xen(
- GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE,
- virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
+ /*
+ * Map default GDT into its final positions in the idle page table. As
+ * noted in arch_do_createdomain(), we must map for every possible VCPU#.
+ */
+ vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE;
+ for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ {
+ map_pages_to_xen(
+ vgdt, virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
+ vgdt += 1 << PDPT_VCPU_VA_SHIFT;
+ }
find_smp_config();